From 2ad9714117780908103ae957bb943f7f5fccb224 Mon Sep 17 00:00:00 2001 From: "kaf24@pb001.cl.cam.ac.uk" Date: Thu, 16 Dec 2004 15:41:47 +0000 Subject: [PATCH] bitkeeper revision 1.1159.1.488 (41c1acbbhUN0iUWmupmPB85ghWRehg) Many files: x86/64 fixes. --- BitKeeper/etc/logging_ok | 1 + xen/arch/x86/Makefile | 6 ++++++ xen/arch/x86/boot/x86_64.S | 3 +++ xen/arch/x86/domain.c | 2 +- xen/arch/x86/shadow.c | 22 +++++++++++----------- xen/arch/x86/x86_64/asm-offsets.c | 16 ++++++++-------- xen/arch/x86/x86_64/usercopy.c | 2 +- xen/include/asm-x86/config.h | 9 ++++++--- xen/include/asm-x86/mm.h | 4 ++-- xen/include/asm-x86/pda.h | 2 +- xen/include/asm-x86/processor.h | 17 ++++++----------- xen/include/asm-x86/x86_32/current.h | 12 ++++++------ xen/include/asm-x86/x86_64/current.h | 12 ++++++------ 13 files changed, 58 insertions(+), 50 deletions(-) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index cbc3dd3c3e..c7daeeea4b 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -27,6 +27,7 @@ jws@cairnwell.research kaf24@camelot.eng.3leafnetworks.com kaf24@freefall.cl.cam.ac.uk kaf24@labyrinth.cl.cam.ac.uk +kaf24@pb001.cl.cam.ac.uk kaf24@penguin.local kaf24@plym.cl.cam.ac.uk kaf24@scramble.cl.cam.ac.uk diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile index 0062ce7678..55603231a1 100644 --- a/xen/arch/x86/Makefile +++ b/xen/arch/x86/Makefile @@ -12,6 +12,12 @@ OBJS += $(patsubst %.c,%.o,$(wildcard mtrr/*.c)) OBJS := $(subst $(TARGET_SUBARCH)/asm-offsets.o,,$(OBJS)) +ifneq ($(TARGET_SUBARCH),i386) +OBJS := $(subst vmx.o,,$(OBJS)) +OBJS := $(subst vmx_io.o,,$(OBJS)) +OBJS := $(subst vmx_vmcs.o,,$(OBJS)) +endif ($(TARGET_SUBARCH),i386) + default: boot/$(TARGET_SUBARCH).o $(OBJS) boot/mkelf32 $(LD) $(LDFLAGS) -r -o arch.o $(OBJS) $(LD) $(LDFLAGS) -T $(TARGET_SUBARCH)/xen.lds -N \ diff --git a/xen/arch/x86/boot/x86_64.S b/xen/arch/x86/boot/x86_64.S index a8253a4ce1..875584126e 100644 --- a/xen/arch/x86/boot/x86_64.S +++ b/xen/arch/x86/boot/x86_64.S @@ -252,11 +252,14 @@ map_domain_mem: unmap_domain_mem: ret_from_intr: #undef machine_to_phys_mapping +#undef phys_to_machine_mapping .globl copy_to_user, set_intr_gate, die, machine_to_phys_mapping +.globl phys_to_machine_mapping copy_to_user: set_intr_gate: die: machine_to_phys_mapping: +phys_to_machine_mapping: .globl copy_from_user, show_registers, do_iopl copy_from_user: show_registers: diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 646bbc3aa0..14f85f2687 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -61,7 +61,7 @@ static void default_idle(void) __sti(); } -static void idle_loop(void) +void idle_loop(void) { int cpu = smp_processor_id(); for ( ; ; ) diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c index 2485376472..58a6fffdc4 100644 --- a/xen/arch/x86/shadow.c +++ b/xen/arch/x86/shadow.c @@ -450,7 +450,7 @@ unsigned long shadow_l2_table( { struct pfn_info *spfn_info; unsigned long spfn; - l2_pgentry_t *spl2e = 0, *gpl2e; + l2_pgentry_t *spl2e = 0; unsigned long guest_gpfn; __get_machine_to_phys(m, guest_gpfn, gpfn); @@ -471,17 +471,19 @@ unsigned long shadow_l2_table( #ifdef __i386__ /* Install hypervisor and 2x linear p.t. mapings. */ - if (m->shadow_mode == SHM_full_32) + if ( m->shadow_mode == SHM_full_32 ) + { vmx_update_shadow_state(m, gpfn, spfn); - else { + } + else + { spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT); - // can't use the linear map as we may not be in the right PT - gpl2e = (l2_pgentry_t *) map_domain_mem(gpfn << PAGE_SHIFT); /* - * We could proactively fill in PDEs for pages that are already shadowed. - * However, we tried it and it didn't help performance. This is simpler. + * We could proactively fill in PDEs for pages that are already + * shadowed. However, we tried it and it didn't help performance. + * This is simpler. */ - memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t)); + memset(spl2e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t)); /* Install hypervisor and 2x linear p.t. mapings. */ memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], @@ -497,10 +499,8 @@ unsigned long shadow_l2_table( } #endif - if (m->shadow_mode != SHM_full_32) - { + if ( m->shadow_mode != SHM_full_32 ) unmap_domain_mem(spl2e); - } SH_VLOG("shadow_l2_table( %08lx -> %08lx)", gpfn, spfn); return spfn; diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index 2e6c3b396e..4ffe1dd6ad 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -38,14 +38,14 @@ void __dummy__(void) OFFSET(XREGS_ss, struct xen_regs, ss); BLANK(); - OFFSET(DOMAIN_processor, struct domain, processor); - OFFSET(DOMAIN_shared_info, struct domain, shared_info); - OFFSET(DOMAIN_event_sel, struct domain, thread.event_selector); - OFFSET(DOMAIN_event_addr, struct domain, thread.event_address); - OFFSET(DOMAIN_failsafe_sel, struct domain, thread.failsafe_selector); - OFFSET(DOMAIN_failsafe_addr, struct domain, thread.failsafe_address); - OFFSET(DOMAIN_trap_bounce, struct domain, thread.trap_bounce); - OFFSET(DOMAIN_thread_flags, struct domain, thread.flags); + OFFSET(EDOMAIN_processor, struct exec_domain, processor); + OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info); + OFFSET(EDOMAIN_event_sel, struct exec_domain, thread.event_selector); + OFFSET(EDOMAIN_event_addr, struct exec_domain, thread.event_address); + OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, thread.failsafe_selector); + OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, thread.failsafe_address); + OFFSET(EDOMAIN_trap_bounce, struct exec_domain, thread.trap_bounce); + OFFSET(EDOMAIN_thread_flags, struct exec_domain, thread.flags); BLANK(); OFFSET(SHINFO_upcall_pending, shared_info_t, diff --git a/xen/arch/x86/x86_64/usercopy.c b/xen/arch/x86/x86_64/usercopy.c index e7c11fa501..c060c45890 100644 --- a/xen/arch/x86/x86_64/usercopy.c +++ b/xen/arch/x86/x86_64/usercopy.c @@ -88,7 +88,7 @@ unsigned long __clear_user(void *addr, unsigned long size) " .quad 1b,2b\n" ".previous" : [size8] "=c"(size), [dst] "=&D" (__d0) - : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst] "(addr), + : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), [zero] "r" (0UL), [eight] "r" (8UL)); return size; } diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h index 1ac2eb358a..8f89051872 100644 --- a/xen/include/asm-x86/config.h +++ b/xen/include/asm-x86/config.h @@ -4,10 +4,13 @@ * A Linux-style configuration list. */ -#ifndef __XEN_I386_CONFIG_H__ -#define __XEN_I386_CONFIG_H__ +#ifndef __X86_CONFIG_H__ +#define __X86_CONFIG_H__ +#ifdef __i386__ #define CONFIG_VMX 1 +#endif + #define CONFIG_X86 1 #define CONFIG_SMP 1 @@ -228,4 +231,4 @@ extern unsigned long xenheap_phys_end; /* user-configurable */ #define ELFSIZE 32 #endif -#endif /* __XEN_I386_CONFIG_H__ */ +#endif /* __X86_CONFIG_H__ */ diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index e392d588ca..652084f117 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -215,6 +215,7 @@ void synchronise_pagetables(unsigned long cpu_mask); * contiguous (or near contiguous) physical memory. */ #undef machine_to_phys_mapping + /* * The phys_to_machine_mapping is the reversed mapping of MPT for full * virtualization. @@ -223,12 +224,11 @@ void synchronise_pagetables(unsigned long cpu_mask); #ifdef __x86_64__ extern unsigned long *machine_to_phys_mapping; +extern unsigned long *phys_to_machine_mapping; #else #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START) -#ifdef CONFIG_VMX #define phys_to_machine_mapping ((unsigned long *)PERDOMAIN_VIRT_START) #endif -#endif #define DEFAULT_GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY+1) #define DEFAULT_GDT_ADDRESS ((unsigned long)gdt_table) diff --git a/xen/include/asm-x86/pda.h b/xen/include/asm-x86/pda.h index dcecc48f20..42fa8be9b0 100644 --- a/xen/include/asm-x86/pda.h +++ b/xen/include/asm-x86/pda.h @@ -9,7 +9,7 @@ struct x8664_pda { unsigned long kernelstack; /* TOS for current process */ unsigned long oldrsp; /* user rsp for system call */ unsigned long irqrsp; /* Old rsp for interrupts. */ - struct domain *pcurrent; /* Current process */ + struct exec_domain *pcurrent; /* Current process */ int irqcount; /* Irq nesting counter. Starts with -1 */ int cpunumber; /* Logical CPU number */ char *irqstackptr; /* top of irqstack */ diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h index 9935c9b2b6..d7bd5ec4e4 100644 --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -478,17 +478,12 @@ struct mm_struct { l1_pgentry_t *perdomain_ptes; pagetable_t pagetable; -#ifdef CONFIG_VMX - -#define SHM_full_32 (8) /* full virtualization for 32-bit */ - - pagetable_t monitor_table; - l2_pgentry_t *vpagetable; /* virtual address of pagetable */ - l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */ - l2_pgentry_t *guest_pl2e_cache; /* guest page directory cache */ - unsigned long min_pfn; /* min host physical */ - unsigned long max_pfn; /* max host physical */ -#endif + pagetable_t monitor_table; + l2_pgentry_t *vpagetable; /* virtual address of pagetable */ + l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */ + l2_pgentry_t *guest_pl2e_cache; /* guest page directory cache */ + unsigned long min_pfn; /* min host physical */ + unsigned long max_pfn; /* max host physical */ /* shadow mode status and controls */ unsigned int shadow_mode; /* flags to control shadow table operation */ diff --git a/xen/include/asm-x86/x86_32/current.h b/xen/include/asm-x86/x86_32/current.h index 99d7b4b0b3..42d01ee134 100644 --- a/xen/include/asm-x86/x86_32/current.h +++ b/xen/include/asm-x86/x86_32/current.h @@ -6,20 +6,20 @@ struct domain; #define STACK_RESERVED \ (sizeof(execution_context_t) + sizeof(struct domain *)) -static inline struct exec_domain * get_current(void) +static inline struct exec_domain *get_current(void) { - struct exec_domain *current; + struct exec_domain *ed; __asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0" - : "=r" (current) : "0" (STACK_SIZE-4) ); - return current; + : "=r" (ed) : "0" (STACK_SIZE-4) ); + return ed; } #define current get_current() -static inline void set_current(struct exec_domain *p) +static inline void set_current(struct exec_domain *ed) { __asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)" - : : "r" (STACK_SIZE-4), "r" (p) ); + : : "r" (STACK_SIZE-4), "r" (ed) ); } static inline execution_context_t *get_execution_context(void) diff --git a/xen/include/asm-x86/x86_64/current.h b/xen/include/asm-x86/x86_64/current.h index 2ee550643b..576e19c112 100644 --- a/xen/include/asm-x86/x86_64/current.h +++ b/xen/include/asm-x86/x86_64/current.h @@ -9,18 +9,18 @@ struct domain; #define STACK_RESERVED \ (sizeof(execution_context_t)) -static inline struct domain * get_current(void) +static inline struct exec_domain *get_current(void) { - struct domain *current; - current = read_pda(pcurrent); - return current; + struct exec_domain *ed; + ed = read_pda(pcurrent); + return ed; } #define current get_current() -static inline void set_current(struct domain *p) +static inline void set_current(struct exec_domain *ed) { - write_pda(pcurrent,p); + write_pda(pcurrent, ed); } static inline execution_context_t *get_execution_context(void) -- 2.30.2